From 9b83ae02b774e13d3aa218e747fa5cbeef505d0b Mon Sep 17 00:00:00 2001 From: "kaf24@freefall.cl.cam.ac.uk" Date: Fri, 27 Aug 2004 16:01:06 +0000 Subject: [PATCH] bitkeeper revision 1.1159.62.1 (412f5ac2sXlMhMeRveH20BE_o6ZJVw) Allow shadow p.t. code to do unsafe things with shadow locks held. --- xen/arch/x86/shadow.c | 14 +++++++------- xen/common/kernel.c | 16 ++++++++++++---- xen/include/asm-x86/shadow.h | 21 +++++++++++---------- xen/include/xen/spinlock.h | 11 +++++++++++ 4 files changed, 41 insertions(+), 21 deletions(-) diff --git a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c index e1fac065d0..672f48ef9f 100644 --- a/xen/arch/x86/shadow.c +++ b/xen/arch/x86/shadow.c @@ -443,7 +443,7 @@ int shadow_mode_control(struct domain *d, dom0_shadow_control_t *sc) domain_pause(d); synchronise_pagetables(~0UL); - spin_lock(&d->mm.shadow_lock); + shadow_lock(&d->mm); if ( cmd == DOM0_SHADOW_CONTROL_OP_OFF ) { @@ -470,7 +470,7 @@ int shadow_mode_control(struct domain *d, dom0_shadow_control_t *sc) rc = -EINVAL; } - spin_unlock(&d->mm.shadow_lock); + shadow_unlock(&d->mm); domain_unpause(d); @@ -620,19 +620,19 @@ int shadow_fault( unsigned long va, long error_code ) // take the lock and reread gpte - spin_lock(¤t->mm.shadow_lock); + shadow_lock(m); if ( unlikely(__get_user(gpte, (unsigned long*)&linear_pg_table[va>>PAGE_SHIFT])) ) { SH_VVLOG("shadow_fault - EXIT: read gpte faulted" ); - spin_unlock(&m->shadow_lock); + shadow_unlock(m); return 0; // propagate to guest } if ( unlikely(!(gpte & _PAGE_PRESENT)) ) { SH_VVLOG("shadow_fault - EXIT: gpte not present (%lx)",gpte ); - spin_unlock(&m->shadow_lock); + shadow_unlock(m); return 0; // we're not going to be able to help } @@ -645,7 +645,7 @@ int shadow_fault( unsigned long va, long error_code ) else { // write fault on RO page SH_VVLOG("shadow_fault - EXIT: write fault on RO page (%lx)",gpte ); - spin_unlock(&m->shadow_lock); + shadow_unlock(m); return 0; // propagate to guest // not clear whether we should set accessed bit here... } @@ -737,7 +737,7 @@ int shadow_fault( unsigned long va, long error_code ) check_pagetable( current, current->mm.pagetable, "post-sf" ); - spin_unlock(&m->shadow_lock); + shadow_unlock(m); return 1; // let's try the faulting instruction again... diff --git a/xen/common/kernel.c b/xen/common/kernel.c index 6d70547321..2c67cf7506 100644 --- a/xen/common/kernel.c +++ b/xen/common/kernel.c @@ -396,12 +396,20 @@ long do_ni_hypercall(void) #ifndef NDEBUG -static int crit_count[NR_CPUS]; -static int crit_checking = 1; +static int crit_count[NR_CPUS], crit_checking_disabled[NR_CPUS]; void disable_criticalregion_checking(void) { - crit_checking = 0; + int cpu = smp_processor_id(); + ASSERT(crit_checking_disabled[cpu] >= 0); + crit_checking_disabled[cpu]++; +} + +void enable_criticalregion_checking(void) +{ + int cpu = smp_processor_id(); + crit_checking_disabled[cpu]--; + ASSERT(crit_checking_disabled[cpu] >= 0); } void criticalregion_enter(void) @@ -421,7 +429,7 @@ void criticalregion_exit(void) void ASSERT_no_criticalregion(void) { int cpu = smp_processor_id(); - if ( (crit_count[cpu] == 0) || !crit_checking ) + if ( (crit_count[cpu] == 0) || crit_checking_disabled[cpu] ) return; disable_criticalregion_checking(); ASSERT(crit_count[cpu] >= 0); /* -ve count is a special kind of bogus! */ diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h index f874b6be4a..5a1994cabb 100644 --- a/xen/include/asm-x86/shadow.h +++ b/xen/include/asm-x86/shadow.h @@ -26,6 +26,8 @@ #define shadow_mode(_d) ((_d)->mm.shadow_mode) #define shadow_lock_init(_d) spin_lock_init(&(_d)->mm.shadow_lock) +#define shadow_lock(_m) spin_lock_nochecking(&(_m)->shadow_lock) +#define shadow_unlock(_m) spin_unlock_nochecking(&(_m)->shadow_lock) extern void shadow_mode_init(void); extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc); @@ -134,9 +136,9 @@ static inline int mark_dirty( struct mm_struct *m, unsigned int mfn ) int rc; ASSERT(local_irq_is_enabled()); //if(spin_is_locked(&m->shadow_lock)) printk("+"); - spin_lock(&m->shadow_lock); + shadow_lock(m); rc = __mark_dirty( m, mfn ); - spin_unlock(&m->shadow_lock); + shadow_unlock(m); return rc; } @@ -388,20 +390,21 @@ static inline unsigned long get_shadow_status( struct mm_struct *m, ASSERT(local_irq_is_enabled()); //if(spin_is_locked(&m->shadow_lock)) printk("*"); - spin_lock(&m->shadow_lock); + shadow_lock(m); if( m->shadow_mode == SHM_logdirty ) __mark_dirty( m, gpfn ); res = __shadow_status( m, gpfn ); - if (!res) spin_unlock(&m->shadow_lock); + if (!res) + shadow_unlock(m); return res; } static inline void put_shadow_status( struct mm_struct *m ) { - spin_unlock(&m->shadow_lock); + shadow_unlock(m); } @@ -583,11 +586,9 @@ static inline void shadow_mk_pagetable( struct mm_struct *mm ) if ( unlikely(mm->shadow_mode) ) { ASSERT(local_irq_is_enabled()); - spin_lock(&mm->shadow_lock); - - __shadow_mk_pagetable( mm ); - - spin_unlock(&mm->shadow_lock); + shadow_lock(mm); + __shadow_mk_pagetable(mm); + shadow_unlock(mm); } SH_VVLOG("leaving shadow_mk_pagetable( gptbase=%08lx, mode=%d ) sh=%08lx", diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h index 0b4503d999..fc2409f13d 100644 --- a/xen/include/xen/spinlock.h +++ b/xen/include/xen/spinlock.h @@ -78,6 +78,7 @@ extern void criticalregion_enter(void); extern void criticalregion_exit(void); extern void ASSERT_no_criticalregion(void); extern void disable_criticalregion_checking(void); +extern void enable_criticalregion_checking(void); #define spin_lock(_lock) \ do { criticalregion_enter(); _raw_spin_lock(_lock); } while (0) @@ -111,6 +112,7 @@ static inline int spin_trylock(spinlock_t *lock) #define ASSERT_no_criticalregion() ((void)0) #define disable_criticalregion_checking() ((void)0) +#define enable_criticalregion_checking() ((void)0) #define spin_lock(_lock) _raw_spin_lock(_lock) #define spin_trylock(_lock) _raw_spin_trylock(_lock) @@ -124,4 +126,13 @@ static inline int spin_trylock(spinlock_t *lock) #endif +/* + * Use these if you have taken special care to ensure that certain unsafe + * things can occur in your critical region (e.g., faults, user-space + * accesses). + */ +#define spin_lock_nochecking(_lock) _raw_spin_lock(_lock) +#define spin_trylock_nochecking(_lock) _raw_spin_trylock(_lock) +#define spin_unlock_nochecking(_lock) _raw_spin_unlock(_lock) + #endif /* __SPINLOCK_H__ */ -- 2.30.2